bitkeeper revision 1.1389.19.3 (42832ff7ACb43Qx1ZO4faAq_Dh0ClA)
authorkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>
Thu, 12 May 2005 10:29:11 +0000 (10:29 +0000)
committerkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>
Thu, 12 May 2005 10:29:11 +0000 (10:29 +0000)
Make shadow_lock() acquire the recursive per-domain BIGLOCK. This change
is easily reverted at the top of shadow.h. This also fixes a problem
with nested shadow_locking -- this is okay because BIGLOCK is nestable.
Signed-off-by: Keir Fraser <keir@xensource.com>
xen/arch/x86/shadow.c
xen/include/asm-x86/domain.h
xen/include/asm-x86/shadow.h

index 3aa12ee5ea4ff1c8505278aab440488515867420..4dbfa6b02d9146cb14fed8a1bc8f7dbea4b739f8 100644 (file)
@@ -1217,7 +1217,7 @@ static int shadow_mode_table_op(
     int               i, rc = 0;
     struct exec_domain *ed;
 
-    ASSERT(spin_is_locked(&d->arch.shadow_lock));
+    ASSERT(shadow_lock_is_acquired(d));
 
     SH_VLOG("shadow mode table op %lx %lx count %d",
             pagetable_val(d->exec_domain[0]->arch.guest_table),  /* XXX SMP */
@@ -1813,7 +1813,7 @@ shadow_mark_mfn_out_of_sync(struct exec_domain *ed, unsigned long gpfn,
     struct pfn_info *page = &frame_table[mfn];
     struct out_of_sync_entry *entry = shadow_alloc_oos_entry(d);
 
-    ASSERT(spin_is_locked(&d->arch.shadow_lock));
+    ASSERT(shadow_lock_is_acquired(d));
     ASSERT(pfn_valid(mfn));
 
 #ifndef NDEBUG
@@ -1943,7 +1943,7 @@ int __shadow_out_of_sync(struct exec_domain *ed, unsigned long va)
     l2_pgentry_t l2e;
     unsigned long l1pfn, l1mfn;
 
-    ASSERT(spin_is_locked(&d->arch.shadow_lock));
+    ASSERT(shadow_lock_is_acquired(d));
     ASSERT(VALID_M2P(l2pfn));
 
     perfc_incrc(shadow_out_of_sync_calls);
@@ -2127,7 +2127,7 @@ int shadow_remove_all_write_access(
     u32 found = 0, fixups, write_refs;
     unsigned long prediction, predicted_gpfn, predicted_smfn;
 
-    ASSERT(spin_is_locked(&d->arch.shadow_lock));
+    ASSERT(shadow_lock_is_acquired(d));
     ASSERT(VALID_MFN(readonly_gmfn));
 
     perfc_incrc(remove_write_access);
@@ -2245,7 +2245,7 @@ u32 shadow_remove_all_access(struct domain *d, unsigned long forbidden_gmfn)
     if ( unlikely(!shadow_mode_enabled(d)) )
         return 0;
 
-    ASSERT(spin_is_locked(&d->arch.shadow_lock));
+    ASSERT(shadow_lock_is_acquired(d));
     perfc_incrc(remove_all_access);
 
     for (i = 0; i < shadow_ht_buckets; i++)
@@ -2287,7 +2287,7 @@ static int resync_all(struct domain *d, u32 stype)
     int unshadow;
     int changed;
 
-    ASSERT(spin_is_locked(&d->arch.shadow_lock));
+    ASSERT(shadow_lock_is_acquired(d));
 
     for ( entry = d->arch.out_of_sync; entry; entry = entry->next)
     {
@@ -2485,7 +2485,7 @@ void __shadow_sync_all(struct domain *d)
 
     perfc_incrc(shadow_sync_all);
 
-    ASSERT(spin_is_locked(&d->arch.shadow_lock));
+    ASSERT(shadow_lock_is_acquired(d));
 
     // First, remove all write permissions to the page tables
     //
index 683ba2ac0ce1a9193e3f6e6280951bfc0ae8567e..86c806d2fd69961321eaf3cd1058f964bbc4b1ea 100644 (file)
@@ -30,7 +30,7 @@ struct arch_domain
 
     /* Shadow mode status and controls. */
     unsigned int shadow_mode;  /* flags to control shadow table operation */
-    spinlock_t   shadow_lock;
+    unsigned int shadow_nest;  /* Recursive depth of shadow_lock() nesting */
     /* Shadow mode has tainted page reference counts? */
     unsigned int shadow_tainted_refcnts;
 
index 202453194f2b9d878b7e5d2d252c0761389787c7..5ba7d72bbb8685f3583bd980e92e394146aba5e8 100644 (file)
 #define __linear_hl2_table ((l1_pgentry_t *)(LINEAR_PT_VIRT_START + \
      (PERDOMAIN_VIRT_START >> (L2_PAGETABLE_SHIFT - L1_PAGETABLE_SHIFT))))
 
-#define shadow_lock_init(_d) spin_lock_init(&(_d)->arch.shadow_lock)
-#define shadow_lock(_d)      do { ASSERT(!spin_is_locked(&(_d)->arch.shadow_lock)); spin_lock(&(_d)->arch.shadow_lock); } while (0)
-#define shadow_unlock(_d)    spin_unlock(&(_d)->arch.shadow_lock)
+/*
+ * For now we use the per-domain BIGLOCK rather than a shadow-specific lock.
+ * We usually have the BIGLOCK already acquired anyway, so this is unlikely
+ * to cause much unnecessary extra serialisation. Also it's a recursive
+ * lock, and there are some code paths containing nested shadow_lock().
+ * The #if0'ed code below is therefore broken until such nesting is removed.
+ */
+#if 0
+#define shadow_lock_init(_d)                    \
+    spin_lock_init(&(_d)->arch.shadow_lock)
+#define shadow_lock_is_acquired(_d)             \
+    spin_is_locked(&(_d)->arch.shadow_lock)
+#define shadow_lock(_d)                         \
+do {                                            \
+    ASSERT(!shadow_lock_is_acquired(_d));       \
+    spin_lock(&(_d)->arch.shadow_lock);         \
+} while (0)
+#define shadow_unlock(_d)                       \
+do {                                            \
+    ASSERT(!shadow_lock_is_acquired(_d));       \
+    spin_unlock(&(_d)->arch.shadow_lock);       \
+} while (0)
+#else
+#define shadow_lock_init(_d)                    \
+    ((_d)->arch.shadow_nest = 0)
+#define shadow_lock_is_acquired(_d)             \
+    (spin_is_locked(&(_d)->big_lock) && ((_d)->arch.shadow_nest != 0))
+#define shadow_lock(_d)                         \
+do {                                            \
+    LOCK_BIGLOCK(_d);                           \
+    (_d)->arch.shadow_nest++;                   \
+} while (0)
+#define shadow_unlock(_d)                       \
+do {                                            \
+    ASSERT(shadow_lock_is_acquired(_d));        \
+    (_d)->arch.shadow_nest--;                   \
+    UNLOCK_BIGLOCK(_d);                         \
+} while (0)
+#endif
 
 #define SHADOW_ENCODE_MIN_MAX(_min, _max) ((((L1_PAGETABLE_ENTRIES - 1) - (_max)) << 16) | (_min))
 #define SHADOW_MIN(_encoded) ((_encoded) & ((1u<<16) - 1))
@@ -403,7 +439,7 @@ static inline int __mark_dirty(struct domain *d, unsigned int mfn)
     unsigned long pfn;
     int           rc = 0;
 
-    ASSERT(spin_is_locked(&d->arch.shadow_lock));
+    ASSERT(shadow_lock_is_acquired(d));
     ASSERT(d->arch.shadow_dirty_bitmap != NULL);
 
     if ( !VALID_MFN(mfn) )
@@ -1137,7 +1173,7 @@ static inline unsigned long __shadow_status(
                           ? __gpfn_to_mfn(d, gpfn)
                           : INVALID_MFN);
 
-    ASSERT(spin_is_locked(&d->arch.shadow_lock));
+    ASSERT(shadow_lock_is_acquired(d));
     ASSERT(gpfn == (gpfn & PGT_mfn_mask));
     ASSERT(stype && !(stype & ~PGT_type_mask));
 
@@ -1186,7 +1222,7 @@ shadow_max_pgtable_type(struct domain *d, unsigned long gpfn,
     struct shadow_status *x;
     u32 pttype = PGT_none, type;
 
-    ASSERT(spin_is_locked(&d->arch.shadow_lock));
+    ASSERT(shadow_lock_is_acquired(d));
     ASSERT(gpfn == (gpfn & PGT_mfn_mask));
 
     perfc_incrc(shadow_max_type);
@@ -1280,7 +1316,7 @@ static inline void delete_shadow_status(
     struct shadow_status *p, *x, *n, *head;
     unsigned long key = gpfn | stype;
 
-    ASSERT(spin_is_locked(&d->arch.shadow_lock));
+    ASSERT(shadow_lock_is_acquired(d));
     ASSERT(!(gpfn & ~PGT_mfn_mask));
     ASSERT(stype && !(stype & ~PGT_type_mask));
 
@@ -1362,7 +1398,7 @@ static inline void set_shadow_status(
 
     SH_VVLOG("set gpfn=%lx gmfn=%lx smfn=%lx t=%lx", gpfn, gmfn, smfn, stype);
 
-    ASSERT(spin_is_locked(&d->arch.shadow_lock));
+    ASSERT(shadow_lock_is_acquired(d));
 
     ASSERT(shadow_mode_translate(d) || gpfn);
     ASSERT(!(gpfn & ~PGT_mfn_mask));